Skip to content

Commit

Permalink
Merge branch 'dev' into peertube
Browse files Browse the repository at this point in the history
  • Loading branch information
B0pol authored Feb 1, 2020
2 parents e392b6c + 62b81c3 commit b671a4b
Show file tree
Hide file tree
Showing 11 changed files with 373 additions and 30 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
import org.schabi.newpipe.extractor.comments.CommentsExtractor;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.feed.FeedExtractor;
import org.schabi.newpipe.extractor.kiosk.KioskList;
import org.schabi.newpipe.extractor.linkhandler.LinkHandler;
import org.schabi.newpipe.extractor.linkhandler.LinkHandlerFactory;
Expand All @@ -24,6 +25,8 @@
import org.schabi.newpipe.extractor.subscription.SubscriptionExtractor;
import org.schabi.newpipe.extractor.suggestion.SuggestionExtractor;

import javax.annotation.Nullable;

/*
* Copyright (C) Christian Schabesberger 2018 <chris.schabesberger@mailbox.org>
* StreamingService.java is part of NewPipe.
Expand Down Expand Up @@ -65,7 +68,7 @@ public ServiceInfo(String name, List<MediaCapability> mediaCapabilities) {
public String getName() {
return name;
}

public List<MediaCapability> getMediaCapabilities() {
return mediaCapabilities;
}
Expand Down Expand Up @@ -116,7 +119,7 @@ public ServiceInfo getServiceInfo() {
public String toString() {
return serviceId + ":" + serviceInfo.getName();
}

public abstract String getBaseUrl();

/*//////////////////////////////////////////////////////////////////////////
Expand Down Expand Up @@ -173,6 +176,19 @@ public String toString() {
*/
public abstract SubscriptionExtractor getSubscriptionExtractor();

/**
* This method decides which strategy will be chosen to fetch the feed. In YouTube, for example, a separate feed
* exists which is lightweight and made specifically to be used like this.
* <p>
* In services which there's no other way to retrieve them, null should be returned.
*
* @return a {@link FeedExtractor} instance or null.
*/
@Nullable
public FeedExtractor getFeedExtractor(String url) throws ExtractionException {
return null;
}

/**
* Must create a new instance of a KioskList implementation.
* @return a new KioskList instance
Expand Down Expand Up @@ -258,7 +274,7 @@ public CommentsExtractor getCommentsExtractor(String url) throws ExtractionExcep
}
return getCommentsExtractor(llhf.fromUrl(url));
}

/*//////////////////////////////////////////////////////////////////////////
// Utils
//////////////////////////////////////////////////////////////////////////*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,7 @@
import org.schabi.newpipe.extractor.NewPipe;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.linkhandler.ListLinkHandler;
import org.schabi.newpipe.extractor.localization.Localization;
import org.schabi.newpipe.extractor.stream.StreamInfoItem;
import org.schabi.newpipe.extractor.utils.ExtractorHelper;

Expand Down Expand Up @@ -35,8 +33,8 @@

public class ChannelInfo extends ListInfo<StreamInfoItem> {

public ChannelInfo(int serviceId, ListLinkHandler linkHandler, String name) throws ParsingException {
super(serviceId, linkHandler, name);
public ChannelInfo(int serviceId, String id, String url, String originalUrl, String name, ListLinkHandler listLinkHandler) {
super(serviceId, id, url, originalUrl, name, listLinkHandler.getContentFilters(), listLinkHandler.getSortFilter());
}

public static ChannelInfo getInfo(String url) throws IOException, ExtractionException {
Expand All @@ -57,15 +55,14 @@ public static InfoItemsPage<StreamInfoItem> getMoreItems(StreamingService servic

public static ChannelInfo getInfo(ChannelExtractor extractor) throws IOException, ExtractionException {

ChannelInfo info = new ChannelInfo(extractor.getServiceId(),
extractor.getLinkHandler(),
extractor.getName());
final int serviceId = extractor.getServiceId();
final String id = extractor.getId();
final String url = extractor.getUrl();
final String originalUrl = extractor.getOriginalUrl();
final String name = extractor.getName();

final ChannelInfo info = new ChannelInfo(serviceId, id, url, originalUrl, name, extractor.getLinkHandler());

try {
info.setOriginalUrl(extractor.getOriginalUrl());
} catch (Exception e) {
info.addError(e);
}
try {
info.setAvatarUrl(extractor.getAvatarUrl());
} catch (Exception e) {
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
package org.schabi.newpipe.extractor.feed;

import org.schabi.newpipe.extractor.ListExtractor;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.linkhandler.ListLinkHandler;
import org.schabi.newpipe.extractor.stream.StreamInfoItem;

/**
* This class helps to extract items from lightweight feeds that the services may provide.
* <p>
* YouTube is an example of a service that has this alternative available.
*/
public abstract class FeedExtractor extends ListExtractor<StreamInfoItem> {
public FeedExtractor(StreamingService service, ListLinkHandler listLinkHandler) {
super(service, listLinkHandler);
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
package org.schabi.newpipe.extractor.feed;

import org.schabi.newpipe.extractor.ListExtractor.InfoItemsPage;
import org.schabi.newpipe.extractor.ListInfo;
import org.schabi.newpipe.extractor.NewPipe;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.stream.StreamInfoItem;
import org.schabi.newpipe.extractor.utils.ExtractorHelper;

import java.io.IOException;
import java.util.List;

public class FeedInfo extends ListInfo<StreamInfoItem> {

public FeedInfo(int serviceId, String id, String url, String originalUrl, String name, List<String> contentFilter, String sortFilter) {
super(serviceId, id, url, originalUrl, name, contentFilter, sortFilter);
}

public static FeedInfo getInfo(String url) throws IOException, ExtractionException {
return getInfo(NewPipe.getServiceByUrl(url), url);
}

public static FeedInfo getInfo(StreamingService service, String url) throws IOException, ExtractionException {
final FeedExtractor extractor = service.getFeedExtractor(url);

if (extractor == null) {
throw new IllegalArgumentException("Service \"" + service.getServiceInfo().getName() + "\" doesn't support FeedExtractor.");
}

extractor.fetchPage();
return getInfo(extractor);
}

public static FeedInfo getInfo(FeedExtractor extractor) throws IOException, ExtractionException {
extractor.fetchPage();

final int serviceId = extractor.getServiceId();
final String id = extractor.getId();
final String url = extractor.getUrl();
final String originalUrl = extractor.getOriginalUrl();
final String name = extractor.getName();

final FeedInfo info = new FeedInfo(serviceId, id, url, originalUrl, name, null, null);

final InfoItemsPage<StreamInfoItem> itemsPage = ExtractorHelper.getItemsPageOrLogError(info, extractor);
info.setRelatedItems(itemsPage.getItems());
info.setNextPageUrl(itemsPage.getNextPageUrl());

return info;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
import org.schabi.newpipe.extractor.channel.ChannelExtractor;
import org.schabi.newpipe.extractor.comments.CommentsExtractor;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.feed.FeedExtractor;
import org.schabi.newpipe.extractor.kiosk.KioskExtractor;
import org.schabi.newpipe.extractor.kiosk.KioskList;
import org.schabi.newpipe.extractor.linkhandler.LinkHandler;
Expand All @@ -24,14 +25,7 @@
import org.schabi.newpipe.extractor.localization.Localization;
import org.schabi.newpipe.extractor.playlist.PlaylistExtractor;
import org.schabi.newpipe.extractor.search.SearchExtractor;
import org.schabi.newpipe.extractor.services.youtube.extractors.YoutubeChannelExtractor;
import org.schabi.newpipe.extractor.services.youtube.extractors.YoutubeCommentsExtractor;
import org.schabi.newpipe.extractor.services.youtube.extractors.YoutubePlaylistExtractor;
import org.schabi.newpipe.extractor.services.youtube.extractors.YoutubeSearchExtractor;
import org.schabi.newpipe.extractor.services.youtube.extractors.YoutubeStreamExtractor;
import org.schabi.newpipe.extractor.services.youtube.extractors.YoutubeSubscriptionExtractor;
import org.schabi.newpipe.extractor.services.youtube.extractors.YoutubeSuggestionExtractor;
import org.schabi.newpipe.extractor.services.youtube.extractors.YoutubeTrendingExtractor;
import org.schabi.newpipe.extractor.services.youtube.extractors.*;
import org.schabi.newpipe.extractor.services.youtube.linkHandler.YoutubeChannelLinkHandlerFactory;
import org.schabi.newpipe.extractor.services.youtube.linkHandler.YoutubeCommentsLinkHandlerFactory;
import org.schabi.newpipe.extractor.services.youtube.linkHandler.YoutubePlaylistLinkHandlerFactory;
Expand All @@ -42,6 +36,8 @@
import org.schabi.newpipe.extractor.subscription.SubscriptionExtractor;
import org.schabi.newpipe.extractor.suggestion.SuggestionExtractor;

import javax.annotation.Nonnull;

/*
* Created by Christian Schabesberger on 23.08.15.
*
Expand Down Expand Up @@ -72,7 +68,7 @@ public YoutubeService(int id) {
public String getBaseUrl() {
return "https://youtube.com";
}

@Override
public LinkHandlerFactory getStreamLHFactory() {
return YoutubeStreamLinkHandlerFactory.getInstance();
Expand Down Expand Up @@ -147,6 +143,12 @@ public SubscriptionExtractor getSubscriptionExtractor() {
return new YoutubeSubscriptionExtractor(this);
}

@Nonnull
@Override
public FeedExtractor getFeedExtractor(final String channelUrl) throws ExtractionException {
return new YoutubeFeedExtractor(this, getChannelLHFactory().fromUrl(channelUrl));
}

@Override
public ListLinkHandlerFactory getCommentsLHFactory() {
return YoutubeCommentsLinkHandlerFactory.getInstance();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,6 @@
@SuppressWarnings("WeakerAccess")
public class YoutubeChannelExtractor extends ChannelExtractor {
/*package-private*/ static final String CHANNEL_URL_BASE = "https://www.youtube.com/channel/";
private static final String CHANNEL_FEED_BASE = "https://www.youtube.com/feeds/videos.xml?channel_id=";
private static final String CHANNEL_URL_PARAMETERS = "/videos?view=0&flow=list&sort=dd&live_view=10000";

private Document doc;
Expand Down Expand Up @@ -130,7 +129,7 @@ public String getBannerUrl() throws ParsingException {
@Override
public String getFeedUrl() throws ParsingException {
try {
return CHANNEL_FEED_BASE + getId();
return YoutubeParsingHelper.getFeedUrlFrom(getId());
} catch (Exception e) {
throw new ParsingException("Could not get feed url", e);
}
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,82 @@
package org.schabi.newpipe.extractor.services.youtube.extractors;

import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import org.schabi.newpipe.extractor.ListExtractor;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.downloader.Downloader;
import org.schabi.newpipe.extractor.downloader.Response;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.feed.FeedExtractor;
import org.schabi.newpipe.extractor.linkhandler.ListLinkHandler;
import org.schabi.newpipe.extractor.services.youtube.linkHandler.YoutubeParsingHelper;
import org.schabi.newpipe.extractor.stream.StreamInfoItem;
import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector;

import javax.annotation.Nonnull;
import java.io.IOException;

public class YoutubeFeedExtractor extends FeedExtractor {
public YoutubeFeedExtractor(StreamingService service, ListLinkHandler linkHandler) {
super(service, linkHandler);
}

private Document document;

@Override
public void onFetchPage(@Nonnull Downloader downloader) throws IOException, ExtractionException {
final String channelIdOrUser = getLinkHandler().getId();
final String feedUrl = YoutubeParsingHelper.getFeedUrlFrom(channelIdOrUser);

final Response response = downloader.get(feedUrl);
document = Jsoup.parse(response.responseBody());
}

@Nonnull
@Override
public ListExtractor.InfoItemsPage<StreamInfoItem> getInitialPage() {
final Elements entries = document.select("feed > entry");
final StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());

for (Element entryElement : entries) {
collector.commit(new YoutubeFeedInfoItemExtractor(entryElement));
}

return new InfoItemsPage<>(collector, null);
}

@Nonnull
@Override
public String getId() {
return document.getElementsByTag("yt:channelId").first().text();
}

@Nonnull
@Override
public String getUrl() {
return document.select("feed > author > uri").first().text();
}

@Nonnull
@Override
public String getName() {
return document.select("feed > author > name").first().text();
}

@Override
public String getNextPageUrl() {
return null;
}

@Override
public InfoItemsPage<StreamInfoItem> getPage(String pageUrl) {
return null;
}

@Override
public boolean hasNextPage() {
return false;
}
}
Loading

0 comments on commit b671a4b

Please sign in to comment.